void audit_domain( struct domain *d)
{
- int ttot=0, ctot=0;
+ int ttot=0, ctot=0, io_mappings=0, lowmem_mappings=0;
void adjust ( struct pfn_info *page, int dir, int adjtype )
{
int count = page->count_info & PGC_count_mask;
if ( l1page->u.inuse.domain != d )
{
- printk("Skip page belowing to other dom %p\n",
+ printk("L2: Skip bizare page belowing to other dom %p\n",
l1page->u.inuse.domain);
continue;
}
unsigned long l1pfn = pt[i]>>PAGE_SHIFT;
struct pfn_info *l1page = &frame_table[l1pfn];
+ if ( l1pfn < 0x100 )
+ {
+ lowmem_mappings++;
+ continue;
+ }
+
+ if ( l1pfn > max_page )
+ {
+ io_mappings++;
+ continue;
+ }
+
if ( pt[i] & _PAGE_RW )
{
if ( l1page->u.inuse.domain != d )
{
- printk("Skip page belowing to other dom %p\n",
- l1page->u.inuse.domain);
+ printk("Audit %d: [%lx,%x] Skip foreign page dom=%lx pfn=%lx c=%08x t=%08x m2p=%lx\n",
+ d->domain, pfn, i,
+ (unsigned long)l1page->u.inuse.domain,
+ l1pfn,
+ l1page->count_info,
+ l1page->u.inuse.type_info,
+ machine_to_phys_mapping[l1pfn]);
continue;
}
unmap_domain_mem(pt);
break;
- }
-
-
+ }
list_ent = frame_table[pfn].list.next;
}
+ if ( io_mappings>0 || lowmem_mappings>0 )
+ printk("Audit %d: Found %d lowmem mappings and %d io mappings\n",
+ d->domain, lowmem_mappings, io_mappings);
+
/* phase 2 */
ctot = ttot = 0;
unsigned long l1pfn = pt[i]>>PAGE_SHIFT;
struct pfn_info *l1page = &frame_table[l1pfn];
- if ( l1page->u.inuse.domain == d)
- adjust( l1page, 1, 0 );
+ if ( l1page->u.inuse.domain != d) continue;
+ if ( l1pfn < 0x100 ) continue;
+ if ( l1pfn > max_page ) continue;
+
+ adjust( l1page, 1, 0 );
#endif
}
}
for_each_domain ( d )
{
- if ( d->domain > 0 )
- audit_domain(d);
+ audit_domain(d);
}
}
0, bytes);
}
+#if 0 /* This optimisation is dangerous for some uses of this function.
+ disable for the moment */
/* Might as well stop the domain as an optimization. */
if ( zero )
domain_pause_by_systemcontroller(d);
+#endif
break;
}
static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m)
{
+ struct pfn_info *page;
m->shadow_page_count++;
- return alloc_domheap_page(NULL);
+ page = alloc_domheap_page(NULL);
+
+ if( unlikely(page == NULL) )
+ {
+ printk("Couldn't alloc shadow page! count=%d\n",
+ m->shadow_page_count);
+ SH_VLOG("Shadow tables l1=%d l2=%d",
+ perfc_value(shadow_l1_pages),
+ perfc_value(shadow_l2_pages));
+ BUG(); // FIXME: try a shadow flush to free up some memory
+ }
+
+ return page;
}
void unshadow_table( unsigned long gpfn, unsigned int type )
}
else
{
- SH_LOG("mark_dirty OOR! mfn=%x pfn=%x max=%x (mm %p)",
- mfn, pfn, m->shadow_dirty_bitmap_size, m );
- SH_LOG("dom=%p caf=%08x taf=%08x\n",
- frame_table[mfn].u.inuse.domain,
- frame_table[mfn].count_info,
- frame_table[mfn].u.inuse.type_info );
+ if ( mfn < max_page )
{
- extern void show_trace(unsigned long *esp);
- unsigned long *esp;
- __asm__ __volatile__ ("movl %%esp,%0" : "=r" (esp) : );
- show_trace(esp);
+ SH_LOG("mark_dirty OOR! mfn=%x pfn=%x max=%x (mm %p)",
+ mfn, pfn, m->shadow_dirty_bitmap_size, m );
+ SH_LOG("dom=%p caf=%08x taf=%08x\n",
+ frame_table[mfn].u.inuse.domain,
+ frame_table[mfn].count_info,
+ frame_table[mfn].u.inuse.type_info );
+ {
+ extern void show_trace(unsigned long *esp);
+ unsigned long *esp;
+ __asm__ __volatile__ ("movl %%esp,%0" : "=r" (esp) : );
+ show_trace(esp);
+ }
}
}